To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 98b6e7a7 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

removed debug printouts

parent e950002d
......@@ -27,4 +27,5 @@ conv_analysis.ipynb
./images/*
./scripts/*
./src/*
./Explain/*
\ No newline at end of file
./Explain/*
_FINAL_RUNS/
\ No newline at end of file
......@@ -26,8 +26,8 @@ class Prediction_history:
#@timing_decorator
#@profile
def on_epoch_end(self):
print("Enter test on epoch end:")
get_gpu_memory()
#print("Enter test on epoch end:")
#get_gpu_memory()
with torch.no_grad():
y_pred = []
for batch, (x, y) in enumerate(self.dataloader):
......@@ -121,7 +121,8 @@ class BaseNet(nn.Module):
curr_val_loss = sys.maxsize # For early stopping
patience = 0
for t in range(epochs):
logging.info(f"Epoch {t+1}\n-------------------------------")
logging.info("-------------------------------")
logging.info(f"Epoch {t+1}")
# print(f"Start EPOCH: Free GPU memory: {get_gpu_memory()}")
# print(f"memory {psutil.virtual_memory()}")
# Run through training and test set
......
......@@ -61,13 +61,13 @@ class ConvNet(ABC, BaseNet):
# Stack the modules
shortcut_cnt = 0
for d in range(self.depth):
print(f"x after block {d} {x.size()}")
#print(f"x after block {d} {x.size()}")
x = self.conv_blocks[d](x)
if self.use_residual and d % 3 == 2:
res = self.shortcuts[shortcut_cnt](input_res)
shortcut_cnt += 1
print(f"x before add {x.size()}")
print(f"res before add {res.size()}")
#print(f"x before add {x.size()}")
#print(f"res before add {res.size()}")
x = torch.add(x, res)
x = nn.functional.relu(x)
input_res = x
......@@ -75,11 +75,11 @@ class ConvNet(ABC, BaseNet):
x = self.gap_layer_pad(x)
x = self.gap_layer(x)
print(f"x after gap {x.size()}")
#print(f"x after gap {x.size()}")
x = x.view(self.batch_size, -1)
print(f"x before output {x.size()}")
#print(f"x before output {x.size()}")
output = self.output_layer(x) # Defined in BaseNet
return output
......
......@@ -17,8 +17,8 @@ def create_dataloader(X, y, batch_size, mode):
logging.info(f"Unsqueeze data for eegnet")
tensor_x = tensor_x.unsqueeze(1)
# Log the shapes
logging.info(f"Tensor x {mode} size: {tensor_x.size()}")
logging.info(f"Tensor y {mode} size: {tensor_y.size()}")
#logging.info(f"Tensor x {mode} size: {tensor_x.size()}")
#logging.info(f"Tensor y {mode} size: {tensor_y.size()}")
# Set device
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Create dataset and dataloader
......
......@@ -8,15 +8,18 @@ from memory_profiler import profile
#import torch.profiler
#@timing_decorator
@profile
#@profile
def train_loop(dataloader, model, loss_fn, optimizer):
"""
Performs one epoch of training the model through the dataset stored in dataloader
Using the given loss_fn and optimizer
Returns training loss of the epoch to be tracked by the caller
"""
#print("Enter training:")
#get_gpu_memory()
#print(torch.cuda.memory_summary())
size = len(dataloader.dataset)
print(f"size {size}")
training_loss, correct = 0, 0
for batch, (X, y) in enumerate(dataloader):
# Move tensors to GPU
......@@ -32,8 +35,13 @@ def train_loop(dataloader, model, loss_fn, optimizer):
optimizer.step()
# Add up metrics
training_loss += loss.item()
pred = (pred > 0.5).float()
correct += (pred == y).float().sum()
if config['task'] == 'prosaccade-clf':
pred = (pred > 0.5).float()
correct += (pred == y).float().sum()
# Remove batch from gpu
del X
del y
torch.cuda.empty_cache()
loss = training_loss / size
logging.info(f"Avg training loss: {loss:>7f}")
......@@ -44,12 +52,16 @@ def train_loop(dataloader, model, loss_fn, optimizer):
return float(loss), -1
#@timing_decorator
@profile
#@profile
def validation_loop(dataloader, model, loss_fn):
"""
Performs one prediction run through the test set stored in the dataloader
Prints the loss function computed with the prediction pred and the labels y
"""
#print("Enter validation:")
#get_gpu_memory()
#print(torch.cuda.memory_summary())
size = len(dataloader.dataset)
val_loss, correct = 0, 0
with torch.no_grad():
......@@ -65,6 +77,10 @@ def validation_loop(dataloader, model, loss_fn):
if config['task'] == 'prosaccade-clf':
pred = (pred > 0.5).float()
correct += (pred == y).float().sum()
# Remove batch from gpu
del X
del y
torch.cuda.empty_cache()
loss = val_loss / size
logging.info(f"Avg validation loss: {loss:>8f}")
......
......@@ -30,7 +30,7 @@ def timing_decorator(func):
return wrapper
@profile
#@profile
def compute_loss(loss_fn, dataloader, pred_list, nb_models):
"""
Computes the loss across all batches between the true labels in the dataloader and the batch predictions in pred_list
......@@ -60,7 +60,7 @@ def compute_loss(loss_fn, dataloader, pred_list, nb_models):
loss.append(loss_fn(y, pred) / config['batch_size'])
return sum(loss) / len(loss)
@profile
#@profile
def compute_accuracy(dataloader, pred_list, nb_models):
"""
Computes the accuracy across al batches between the true labels in the dataloader and the batch predictions in pred_list
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment