To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit b98a80dc authored by Lukas Wolf's avatar Lukas Wolf
Browse files

remove debug printout

parent 1b6ff02c
......@@ -14,7 +14,6 @@ models_scratch.py
/log/*
/images/*
/archive_runs/*
run.sh
retrain.sh
labels.txt
preds.txt
......@@ -28,4 +27,7 @@ conv_analysis.ipynb
./scripts/*
./src/*
./Explain/*
_FINAL_RUNS/
\ No newline at end of file
_MAX_FINAL_RUNS/
Compute_Ensemble_Metrics.py
_FINAL_EXP/
model_load.ipynb
\ No newline at end of file
......@@ -116,7 +116,7 @@ config['split'] = False
def create_folder():
def create_folder(config):
##################################################################
# Manage the model directory and output directory structure
##################################################################
......
......@@ -61,7 +61,7 @@ def benchmark_task(task):
# MODELS FOR BENCHMARK
config['model'] = 'cnn'
create_folder()
create_folder(config)
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
log_config()
......@@ -72,7 +72,7 @@ def benchmark_task(task):
start_time = time.time()
config['model'] = 'inception'
create_folder()
create_folder(config)
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
log_config()
......@@ -83,7 +83,7 @@ def benchmark_task(task):
start_time = time.time()
config['model'] = 'eegnet'
create_folder()
create_folder(config)
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
log_config()
......@@ -94,14 +94,14 @@ def benchmark_task(task):
start_time = time.time()
config['model'] = 'xception'
create_folder()
create_folder(config)
trainer = Trainer(config)
trainer.train()
logging.info("--- Runtime: %s seconds ---" % (time.time() - start_time))
start_time = time.time()
config['model'] = 'pyramidal_cnn'
create_folder()
create_folder(config)
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
log_config()
......
......@@ -65,13 +65,9 @@ def validation_loop(dataloader, model, loss_fn):
with torch.no_grad():
for batch, (X, y) in enumerate(dataloader):
# Move tensors to GPU
print(f"Validation: cuda available {torch.cuda.is_available()}")
if torch.cuda.is_available():
X = X.cuda()
y = y.cuda()
print(f"model type {type(X)}")
print(f"tensor type {type(X)}")
print(f"tensor shape {X.size()}")
# Predict
pred = model(X)
# Compute metrics
......
......@@ -87,14 +87,9 @@ def sum_predictions(dataloader, model, model_number, prediction_list):
"""
with torch.no_grad():
for batch, (X, y) in enumerate(dataloader):
print(f"cuda available {torch.cuda.is_available()}")
if torch.cuda.is_available():
X.cuda()
y.cuda()
print(f"model type {type(X)}")
print(f"tensor type {type(X)}")
print(f"tensor shape {X.size()}")
print(f"x on cuda {X.is_cuda()}")
pred = model(X)
if model_number == 0:
prediction_list.append(pred) # append the predicted tensor for each batch
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment