To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 0bb86b9a authored by Lukas Wolf's avatar Lukas Wolf
Browse files

commit to pull for local computation

parent f6e09f04
......@@ -40,7 +40,7 @@ config['root_dir'] = '.'
# Choose task and dataset
##################################################################
config['preprocessing'] = 'min' # options: min and max
config['preprocessing'] = 'max' # options: min and max
#config['task'] = 'prosaccade-clf'
config['task'] = 'gaze-reg'
......@@ -82,9 +82,9 @@ with open('hyperparams.json', 'r') as file:
params = json.load(file)
config['learning_rate'] = params[config['model']][config['task']]['learning_rate']
config['regularization'] = params[config['model']][config['task']]['regularization']
config['epochs'] = 50
config['epochs'] = 75
config['batch_size'] = 64
config['early_stopping'] = False
config['early_stopping'] = True
config['patience'] = 10
##################################################################
......
......@@ -12,7 +12,6 @@ from torch_models.torch_utils.utils import get_gpu_memory
import psutil
from torch_models.torch_utils.utils import timing_decorator
from memory_profiler import profile
class Prediction_history:
"""
Collect predictions of the given validation set after each epoch
......@@ -37,6 +36,16 @@ class Prediction_history:
y = y.cuda()
pred = self.model(x)
y_pred.append(pred)
if batch==0:
loss = nn.MSELoss()
cat = torch.cat((y, pred), dim=1)
batch_loss = loss(y, pred)
print(f"true label vs. pred")
print(cat)
print(f"batch loss")
print(batch_loss)
# Remove batch from GPU
del x
del y
......@@ -60,6 +69,7 @@ class BaseNet(nn.Module):
self.batch_size = batch_size
self.timesamples = self.input_shape[0]
self.nb_channels = self.input_shape[1]
self.early_stopped = False # Set to true and skip training if this is false
# Create output layer depending on task and
if config['task'] == 'prosaccade-clf':
......@@ -80,7 +90,7 @@ class BaseNet(nn.Module):
nn.Linear(in_features=self.get_nb_features_output_layer(), out_features=1)
)
if verbose:
if verbose and self.model_number == 0:
logging.info(f"Using loss fct: {self.loss_fn}")
# abstract method
......@@ -126,27 +136,28 @@ class BaseNet(nn.Module):
# print(f"Start EPOCH: Free GPU memory: {get_gpu_memory()}")
# print(f"memory {psutil.virtual_memory()}")
# Run through training and test set
train_loss_epoch, train_acc_epoch = train_loop(train_dataloader, self.float(), self.loss_fn, optimizer)
# print(f"Free GPU mem after train loop: {get_gpu_memory()}")
# print(f"memory {psutil.virtual_memory()}")
val_loss_epoch, val_acc_epoch = validation_loop(validation_dataloader, self.float(), self.loss_fn)
# Add them for later printout
metrics['train_loss'].append(train_loss_epoch)
metrics['val_loss'].append(val_loss_epoch)
if config['task'] == 'prosaccade-clf':
metrics['train_acc'].append(train_acc_epoch)
metrics['val_acc'].append(val_acc_epoch)
# print("Free GPU mem after test loop:")
# print(f"memory {psutil.virtual_memory()}")
# Add the predictions on the validation set
if not self.early_stopped:
train_loss_epoch, train_acc_epoch = train_loop(train_dataloader, self.float(), self.loss_fn, optimizer)
# print(f"Free GPU mem after train loop: {get_gpu_memory()}")
# print(f"memory {psutil.virtual_memory()}")
val_loss_epoch, val_acc_epoch = validation_loop(validation_dataloader, self.float(), self.loss_fn)
# Add them for later printout
metrics['train_loss'].append(train_loss_epoch)
metrics['val_loss'].append(val_loss_epoch)
if config['task'] == 'prosaccade-clf':
metrics['train_acc'].append(train_acc_epoch)
metrics['val_acc'].append(val_acc_epoch)
# print("Free GPU mem after test loop:")
# print(f"memory {psutil.virtual_memory()}")
# Add the predictions on the validation set, even if model was early stopped
prediction_ensemble.on_epoch_end()
# print("Free GPU mem after prediction hist:")
# print(f"memory {psutil.virtual_memory()}")
# Impementation of early stopping
if config['early_stopping']:
if config['early_stopping'] and not self.early_stopped:
if patience > config['patience']:
logging.info(f"Early stopping the model after {t} epochs")
break
self.early_stopped = True
if val_loss_epoch > curr_val_loss:
patience +=1
else:
......
......@@ -29,6 +29,7 @@ class ConvNet(ABC, BaseNet):
self.kernel_size = kernel_size
self.nb_filters = nb_filters
self.preprocessing = preprocessing
self.input_size = (batch_size, input_shape[1], input_shape[0]) # needed for model summary in BaseNet
# Define all the convolutional and shortcut modules that we will need in the model
self.conv_blocks = nn.ModuleList([self._module(d) for d in range(self.depth)])
......@@ -46,7 +47,6 @@ class ConvNet(ABC, BaseNet):
logging.info('--------------- kernel size : ' + str(self.kernel_size))
logging.info('--------------- nb filters : ' + str(self.nb_filters))
logging.info('--------------- preprocessing: ' + str(self.preprocessing))
print(self)
def forward(self, x):
......
......@@ -26,6 +26,8 @@ class EEGNet(BaseNet):
self.F2 = F2
self.kernel_size = kernel_size
self.dropout_rate = dropout_rate
self.input_size = (batch_size, 1, input_shape[1], input_shape[0]) # needed for model summary in BaseNet
super().__init__(input_shape=input_shape, epochs=epochs, model_number=model_number, batch_size=batch_size)
# Block 1: 2dconv and depthwise conv
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment