To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit a9ae96b9 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

added json file to set hyperparams for every model/task combination

parent d679800a
......@@ -2,6 +2,7 @@
# let's keep it here to have a clean code on other methods that we try
import time
import os
import json
#from Clusters.cluster import clustering as clustering
#from Clusters.cluster2 import clustering as clustering2
#from Clusters.cluster3 import clustering as clustering3
......@@ -60,13 +61,13 @@ config['framework'] = 'pytorch'
# Choose model
##################################################################
config['ensemble'] = 3 #number of models in the ensemble
config['ensemble'] = 2 #number of models in the ensemble
config['pretrained'] = False # We can use a model pretrained on processing speed task
#config['model'] = 'cnn'
config['model'] = 'cnn'
#config['model'] = 'inception'
#config['model'] = 'eegnet'
config['model'] = 'xception'
#config['model'] = 'xception'
#config['model'] = 'pyramidal_cnn'
#config['model'] = 'deepeye'
#config['model'] = 'deepeye-rnn'
......@@ -74,11 +75,13 @@ config['model'] = 'xception'
#config['model'] = 'siamese' # Note that you have to set data_mode to sacc_fix for this model
##################################################################
# Hyper-parameters and training configuration.
# Hyper-parameters and training configuration. Set them in hyperparams.json
##################################################################
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, for inception on angle 1e-5
config['regularization'] = 0 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 3
with open('hyperparams.json', 'r') as file:
params = json.load(file)
config['learning_rate'] = params[config['model']][config['task']]['learning_rate']
config['regularization'] = params[config['model']][config['task']]['regularization']
config['epochs'] = 15
config['batch_size'] = 64
##################################################################
......@@ -203,9 +206,8 @@ timestamp = str(int(time.time()))
model_folder_name = timestamp + "_pretrained_" + config['model'] if config['pretrained'] else timestamp + "_" + config['model']
# Modify the model folder name depending on which task tuns
model_folder_name += "_" + config['task']
#if config['task'] != 'prosaccade-clf':
# model_folder_name += '_' + config['data_mode']
# model_folder_name += '_' + config['dataset']
model_folder_name += "_prep" + config['preprocessing']
if config['split']:
model_folder_name += '_cluster'
......
{
"cnn": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
},
"gaze-reg": {
"learning_rate": 1e-3,
"regularization": 0
},
"angle-reg": {
"learning_rate": 1e-3,
"regularization": 0
}
},
"xception": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
},
"gaze-reg": {
"learning_rate": 1e-3,
"regularization": 0
},
"angle-reg": {
"learning_rate": 1e-3,
"regularization": 0
}
},
"inception": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
},
"gaze-reg": {
"learning_rate": 1e-3,
"regularization": 0
},
"angle-reg": {
"learning_rate": 1e-3,
"regularization": 0
}
},
"eegnet": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
},
"gaze-reg": {
"learning_rate": 1e-3,
"regularization": 0
},
"angle-reg": {
"learning_rate": 1e-3,
"regularization": 0
}
},
"pyramidal_cnn": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
},
"gaze-reg": {
"learning_rate": 1e-3,
"regularization": 0
},
"angle-reg": {
"learning_rate": 1e-3,
"regularization": 0
}
}
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment