To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 498e8fca authored by Lukas Wolf's avatar Lukas Wolf
Browse files

removed device and removed shortcut init if use_residual=False

parent 805f1463
......@@ -15,10 +15,9 @@ class Prediction_history:
Collect predictions of the given validation set after each epoch
predhis is a list of lists (one for each epoch) of tensors (one for each batch)
"""
def __init__(self, dataloader, device, model) -> None:
def __init__(self, dataloader, model) -> None:
self.dataloader = dataloader
self.predhis = []
self.device = device
self.model = model
#@timing_decorator
......@@ -26,16 +25,23 @@ class Prediction_history:
def on_epoch_end(self):
with torch.no_grad():
y_pred = []
for x, y in self.dataloader:
for batch, (x, y) in enumerate(self.dataloader):
# Move batch to GPU
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
y_pred.append(self.model(x))
pred = self.model(x)
y_pred.append(pred)
if batch==0:
cat = torch.cat((y, pred), dim=1)
print(f"COMPARE Y AND PRED")
print(cat)
# Remove batch from GPU
del x
del y
#torch.cuda.empty_cache()
self.predhis.append(y_pred)
class BaseNet(nn.Module):
......@@ -55,10 +61,10 @@ class BaseNet(nn.Module):
self.batch_size = batch_size
self.nb_channels = self.input_shape[1]
self.timesamples = self.input_shape[0]
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Create output layer depending on task and
if config['task'] == 'prosaccade_clf':
if config['task'] == 'prosaccade-clf':
self.loss_fn = nn.BCELoss()
self.output_layer = nn.Sequential(
nn.Linear(in_features=self.get_nb_features_output_layer(), out_features=1),
......@@ -76,6 +82,9 @@ class BaseNet(nn.Module):
nn.Linear(in_features=self.get_nb_features_output_layer(), out_features=1)
)
if verbose:
logging.info(f"Using loss fct: {self.loss_fn}")
# abstract method
def forward(self, x):
"""
......@@ -94,14 +103,12 @@ class BaseNet(nn.Module):
def _split_model(self):
pass
@profile
@timing_decorator
#@profile
#@timing_decorator
def fit(self, train_dataloader, test_dataloader, subjectID=None):
"""
Fit the model on the dataset defined by data x and labels y
"""
logging.info("------------------------------------------------------------------------------------")
logging.info(f"Fitting model number {self.model_number}")
# Move the model to GPU
if torch.cuda.is_available():
self.cuda()
......@@ -114,22 +121,20 @@ class BaseNet(nn.Module):
epochs = config['epochs']
for t in range(epochs):
logging.info(f"Epoch {t+1}\n-------------------------------")
print(f"Start EPOCH: Free GPU memory: {get_gpu_memory()}")
print(f"memory {psutil.virtual_memory()}")
# print(f"Start EPOCH: Free GPU memory: {get_gpu_memory()}")
# print(f"memory {psutil.virtual_memory()}")
# Run through training and test set
print(self.device)
train_loop(train_dataloader, self.float(), self.loss_fn, optimizer, self.device)
print(f"Free GPU mem after train loop: {get_gpu_memory()}")
print(f"memory {psutil.virtual_memory()}")
test_loop(test_dataloader, self.float(), self.loss_fn, self.device)
print("Free GPU mem after test loop:")
print(f"memory {psutil.virtual_memory()}")
train_loop(train_dataloader, self.float(), self.loss_fn, optimizer)
# print(f"Free GPU mem after train loop: {get_gpu_memory()}")
# print(f"memory {psutil.virtual_memory()}")
test_loop(test_dataloader, self.float(), self.loss_fn)
# print("Free GPU mem after test loop:")
# print(f"memory {psutil.virtual_memory()}")
# Add the predictions on the validation set
prediction_ensemble.on_epoch_end()
print("Free GPU mem after prediction hist:")
print(f"memory {psutil.virtual_memory()}")
# print("Free GPU mem after prediction hist:")
# print(f"memory {psutil.virtual_memory()}")
# Done with training this model
logging.info(f"Finished model number {self.model_number}")
if config['save_models'] and self.model_number==0:
ckpt_dir = config['model_dir'] + '/best_models/' + config['model'] + '_nb_{}_'.format(self.model_number) + 'best_model.pth'
torch.save(self.state_dict(), ckpt_dir)
......
......@@ -32,7 +32,8 @@ class ConvNet(ABC, BaseNet):
# Define all the convolutional and shortcut modules that we will need in the model
self.conv_blocks = nn.ModuleList([self._module(d) for d in range(self.depth)])
self.shortcuts = nn.ModuleList([self._shortcut(d) for d in range(int(self.depth / 3))])
if self.use_residual:
self.shortcuts = nn.ModuleList([self._shortcut(d) for d in range(int(self.depth / 3))])
self.gap_layer = nn.AvgPool1d(kernel_size=2, stride=1)
self.gap_layer_pad = Pad_Pool(left=0, right=1, value=0)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment