To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit a56c05f4 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

add plots and save metrics fr all models of ensemblee

parent 668405fc
......@@ -40,7 +40,7 @@ config['root_dir'] = '.'
# Choose task and dataset
##################################################################
config['preprocessing'] = 'min' # options: min and max
config['preprocessing'] = 'max' # options: min and max
config['task'] = 'prosaccade-clf'
#config['task'] = 'gaze-reg'
......@@ -64,11 +64,14 @@ config['framework'] = 'pytorch'
config['ensemble'] = 2 #number of models in the ensemble
config['pretrained'] = False # We can use a model pretrained on processing speed task
config['model'] = 'cnn'
# MODELS FOR BENCHMARK
#config['model'] = 'cnn'
#config['model'] = 'inception'
#config['model'] = 'eegnet'
config['model'] = 'eegnet'
#config['model'] = 'xception'
#config['model'] = 'pyramidal_cnn'
# EXPERIMENTAL MODELS
#config['model'] = 'deepeye'
#config['model'] = 'deepeye-rnn'
#config['model'] = 'gazenet'
......@@ -81,8 +84,10 @@ with open('hyperparams.json', 'r') as file:
params = json.load(file)
config['learning_rate'] = params[config['model']][config['task']]['learning_rate']
config['regularization'] = params[config['model']][config['task']]['regularization']
config['epochs'] = 15
config['epochs'] = 5
config['batch_size'] = 64
config['early_stopping'] = False
config['patience'] = 20
##################################################################
# Choose between ensemble and kerasTuner
......@@ -202,13 +207,13 @@ else:
# Create a unique output directory for this experiment.
timestamp = str(int(time.time()))
#model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + config['model']
model_folder_name = timestamp + "_pretrained_" + config['model'] if config['pretrained'] else timestamp + "_" + config['model']
model_folder_name = timestamp
model_folder_name += "_tf" if config['framework']=='tensorflow' else '_pytorch'
model_folder_name += "_pretrained_" + config['model'] if config['pretrained'] else "_" + config['model']
# Modify the model folder name depending on which task tuns
model_folder_name += "_" + config['task']
model_folder_name += "_prep" + config['preprocessing']
if config['split']:
model_folder_name += '_cluster'
if config['downsampled']:
......@@ -229,9 +234,14 @@ if config['tensorboard_on']:
if not os.path.exists(config['tensorboard_log_dir']):
os.makedirs(config['tensorboard_log_dir'])
config['best_models_dir'] = config['model_dir'] + "/best_models/"
if not os.path.exists(config['best_models_dir']):
os.makedirs(config['best_models_dir'])
if not os.path.exists(config['model_dir'] + "/best_models/"):
os.makedirs(config['model_dir'] + "/best_models/")
if not os.path.exists(config['model_dir'] + "/plots/"):
os.makedirs(config['model_dir'] + "/plots/")
if not os.path.exists(config['model_dir'] + "/metrics/"):
os.makedirs(config['model_dir'] + "/metrics/")
# Save config to model dir
import pickle
......
......@@ -154,7 +154,7 @@ class BaseNet(nn.Module):
plot_metrics(metrics['train_acc'], metrics['val_acc'], output_dir=config['model_dir'], metric='accuracy', model_number=self.model_number)
# Save metrics
df = pd.DataFrame.from_dict(metrics)
df.to_csv(path_or_buf=config['model_dir'] + '/metrics.csv')
df.to_csv(path_or_buf=config['model_dir'] + '/metrics/' + 'metrics_model_nb_' + str(self.model_number) + '.csv')
if config['save_models']:
ckpt_dir = config['model_dir'] + '/best_models/' + config['model'] + '_nb_{}_'.format(self.model_number) + 'best_model.pth'
torch.save(self.state_dict(), ckpt_dir)
......
......@@ -60,11 +60,6 @@ class Ensemble_torch:
train_dataloader = create_dataloader(X_train, y_train, config['batch_size'], 'train')
validation_dataloader = create_dataloader(X_val, y_val, config['batch_size'], 'val')
test_dataloader = create_dataloader(X_test, y_test, config['batch_size'], 'test')
print(f"len train loader {len(train_dataloader.dataset)}")
print(f"len validation loader {len(validation_dataloader.dataset)}")
print(f"len test loader {len(test_dataloader.dataset)}")
# Metrics to save across the ensemble
loss=[]
accuracy=[]
......@@ -96,10 +91,10 @@ class Ensemble_torch:
config['model'] = config['model'] + '_cluster'
# Save metrics and plot them
np.savetxt(X=loss, fname=config['model_dir']+'/'+'ensemble_test_loss_val.csv', delimiter=',')
np.savetxt(X=loss, fname=config['model_dir']+'/metrics/'+'ensemble_test_loss_val.csv', delimiter=',')
plot_array(loss, config['model_dir'], 'test loss')
if config['task'] == 'prosaccade-clf':
np.savetxt(X=accuracy, fname=config['model_dir']+'/'+'ensemble_test_acc_val.csv', delimiter=',')
np.savetxt(X=accuracy, fname=config['model_dir']+'/metrics/'+'ensemble_test_acc_val.csv', delimiter=',')
plot_array(accuracy, config['model_dir'], 'test accuracy')
def create_model(model_type, model_number):
......
......@@ -52,7 +52,7 @@ def plot_array(x, output_dir, metric, savefig=True):
else:
plt.ylabel('Binary Cross Entropy Loss')
if savefig:
plt.savefig(output_dir + '/plots' + config['model'] + '_val_' + metric + '.png')
plt.savefig(output_dir + '/plots/' + config['model'] + '_val_' + metric + '.png')
def plot_metrics(train, val, output_dir, metric, model_number=0 ,savefig=True):
"""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment