Commit 10f5890a authored by Martyna Plomecka's avatar Martyna Plomecka
Browse files

Tuned EEGNet

parent b96aff1d
......@@ -43,7 +43,8 @@ class ConvNet(ABC):
self.model = self._split_model()
else:
self.model = self._build_model()
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
if self.verbose:
self.model.summary()
......@@ -62,7 +63,6 @@ class ConvNet(ABC):
dense = tf.keras.layers.Dense(32, activation='relu')(x)
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
return model
# abstract method
......@@ -105,7 +105,6 @@ class ConvNet(ABC):
return gap_layer
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
return model
def get_model(self):
......
......@@ -70,6 +70,7 @@ class Classifier_EEGNet:
self.model = self.build_model()
if verbose:
self.model.summary()
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
def split_model(self):
"""
......@@ -139,11 +140,13 @@ class Classifier_EEGNet:
dense = Dense(self.nb_classes, name='dense',
kernel_constraint=max_norm(self.norm_rate))(flatten)
softmax = Activation('sigmoid', name='sigmoid')(dense)
return Model(inputs=input1, outputs=softmax)
return Model(inputs=input1, outputs=softmax)
def get_model(self):
return self.model
def fit(self, eegnet_x, y):
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
......
......@@ -42,7 +42,7 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
"""
# Choosing model
config['model'] = 'xception'
config['model'] = 'eegnet'
config['downsampled'] = False
config['split'] = False
config['cluster'] = clustering()
......
......@@ -40,9 +40,12 @@ def build_model(hp):
preprocessing=False
)
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet(dropoutRate = 0.5, kernLength = 250, F1 = 16,
D = 4, F2 = 256, norm_rate = 0.5, dropoutType = 'Dropout',
epochs = 50)
classifier = Classifier_EEGNet(dropoutRate = 0.5,
kernLength = hp.Choice('kernelLength', values=[64, 125, 250]),
F1 = hp.Choice('F1', values=[16, 32, 64]),
D = hp.Choice('D', values=[2, 4, 8]),
F2 = hp.Choice('F2', values=[32, 64, 128, 256, 512]),
norm_rate = 0.5, dropoutType = 'Dropout', epochs = 50)
elif config['model'] == 'inception':
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'],
......
......@@ -24,9 +24,9 @@ def main():
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
# tune(trainX,trainY)
tune(trainX,trainY)
run(trainX,trainY)
# run(trainX,trainY)
# select_best_model()
# comparison_plot()
......
INFO:root:Started the Logging
INFO:root:X training loaded.
INFO:root:(129, 500, 36223)
INFO:root:y training loaded.
INFO:root:(1, 36223)
INFO:root:Setting the shapes
INFO:root:(36223, 500, 129)
INFO:root:(36223, 1)
INFO:root:(36223, 129, 500)
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0132s vs `on_train_batch_end` time: 0.0234s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 128
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0259s vs `on_train_batch_end` time: 0.0563s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 64
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0133s vs `on_train_batch_end` time: 0.0259s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 512
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0189s vs `on_train_batch_end` time: 0.0449s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0305s vs `on_train_batch_end` time: 0.0863s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 128
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0198s vs `on_train_batch_end` time: 0.0311s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 128
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0180s vs `on_train_batch_end` time: 0.0471s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 512
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 128
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0139s vs `on_train_batch_end` time: 0.0258s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0210s vs `on_train_batch_end` time: 0.0454s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 512
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0213s vs `on_train_batch_end` time: 0.0339s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0138s vs `on_train_batch_end` time: 0.0257s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0145s vs `on_train_batch_end` time: 0.0305s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 512
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0145s vs `on_train_batch_end` time: 0.0293s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 128
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0357s vs `on_train_batch_end` time: 0.0614s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 64
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0221s vs `on_train_batch_end` time: 0.0433s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0305s vs `on_train_batch_end` time: 0.0852s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0373s vs `on_train_batch_end` time: 0.0666s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 64
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0133s vs `on_train_batch_end` time: 0.0221s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 128
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0197s vs `on_train_batch_end` time: 0.0304s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 64
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 2
INFO:root:--------------- F2 : 256
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0133s vs `on_train_batch_end` time: 0.0218s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 64
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0260s vs `on_train_batch_end` time: 0.0592s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 16
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 512
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0142s vs `on_train_batch_end` time: 0.0264s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 125
INFO:root:--------------- F1 : 32
INFO:root:--------------- D : 8
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0207s vs `on_train_batch_end` time: 0.0463s). Check your callbacks.
INFO:root:Starting tuning eegnet
INFO:root:Parameters...
INFO:root:--------------- chans : 129
INFO:root:--------------- samples : 500
INFO:root:--------------- dropoutRate : 0.5
INFO:root:--------------- kernLength : 250
INFO:root:--------------- F1 : 64
INFO:root:--------------- D : 4
INFO:root:--------------- F2 : 32
INFO:root:--------------- norm_rate : 0.5
WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0374s vs `on_train_batch_end` time: 0.0636s). Check your callbacks.
INFO:tensorflow:Oracle triggered exit
INFO:root:--- Runtime: 25182.93576860428 seconds ---
INFO:root:Finished Logging
{"ongoing_trials": {}, "hyperparameters": {"space": [{"class_name": "Choice", "config": {"name": "kernelLength", "default": 64, "conditions": [], "values": [64, 125, 250], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F1", "default": 16, "conditions": [], "values": [16, 32, 64], "ordered": true}}, {"class_name": "Choice", "config": {"name": "D", "default": 2, "conditions": [], "values": [2, 4, 8], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F2", "default": 32, "conditions": [], "values": [32, 64, 128, 256, 512], "ordered": true}}], "values": {"kernelLength": 64, "F1": 16, "D": 2, "F2": 32}}, "seed": 8949, "seed_state": 9097, "tried_so_far": ["af07ec38dfef80312c9bcbc59820b39c", "352f73e56e36ba64594cf995f3d98300", "328aab1a0cb83373b03d895d42c5e4a4", "602de808172746d412f75f93975eb988", "3103325bd5f9d1aa09f34d9f1a4bd622", "5178c359e07348e7967f3e8bebe8e342", "450e5a5d0bcee237d5c3cc488e3f4acb", "35b1943b108f7f583246d057e9bfa6e6", "667c1bb265849c6f631ce49e82ae5536", "8331f104a3e866c381892179ceb798df", "5fcaba007b4d99bf762ffa84aba592e7", "153cc204c46684ca8966216bbce493fb", "b635dbf7186c004dc3cda0e03221c8a7", "e6c643c1afd4b75ff7fb72b11588db86", "70ce65dfbec0e1e4d8c79ad6f3d8cd1a", "72f1d6eaf29384ad8ba969114f29be3e", "4c115ae70219de93704495e649aac54d", "52e53292c396f0b290be6c572327d649", "64cf4e118c754cf966d53c66cc2f4088", "c7928f580ebde5574e434dae18240bcf", "7652f93d1f5791295f586d12eac3ea06", "9c82b318f4cda5746b335ef45e3a8f4d", "522736851e93bd94c02af41a11bb4fdb", "cbda7a579d05745ce64726c4feb58d7f", "4e653c66995cc6859fa872c391971ca1", "fa5cd0382812ceda6dc4ca818e198dfa", "3fe0b33cb90d44b329f0954fcee4e86b", "187eaca365ab12ee71a064d87329a624", "5211f165ae8ab8fd505e25ca8cf3e7be", "3eba9377ecfd6d146201be5005ecc95d", "c71d0ad7720be0506eeb5c03c64240ec", "814e09436a8f8d818ef93214a2724c4d"]}
\ No newline at end of file
{"trial_id": "0af12e4fc7372e7de644167a9317e729", "hyperparameters": {"space": [{"class_name": "Choice", "config": {"name": "kernelLength", "default": 64, "conditions": [], "values": [64, 125, 250], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F1", "default": 16, "conditions": [], "values": [16, 32, 64], "ordered": true}}, {"class_name": "Choice", "config": {"name": "D", "default": 2, "conditions": [], "values": [2, 4, 8], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F2", "default": 32, "conditions": [], "values": [32, 64, 128, 256, 512], "ordered": true}}], "values": {"kernelLength": 125, "F1": 32, "D": 8, "F2": 256}}, "metrics": {"metrics": {"loss": {"direction": "min", "observations": [{"value": [0.5376172065734863], "step": 0}]}, "accuracy": {"direction": "max", "observations": [{"value": [0.8668299913406372], "step": 0}]}, "val_loss": {"direction": "min", "observations": [{"value": [0.5643748044967651], "step": 0}]}, "val_accuracy": {"direction": "max", "observations": [{"value": [0.8614216446876526], "step": 0}]}}}, "score": 0.8614216446876526, "best_step": 0, "status": "COMPLETED"}
\ No newline at end of file
{"trial_id": "171151ea18531af5a7e1d18bc74ed931", "hyperparameters": {"space": [{"class_name": "Choice", "config": {"name": "kernelLength", "default": 64, "conditions": [], "values": [64, 125, 250], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F1", "default": 16, "conditions": [], "values": [16, 32, 64], "ordered": true}}, {"class_name": "Choice", "config": {"name": "D", "default": 2, "conditions": [], "values": [2, 4, 8], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F2", "default": 32, "conditions": [], "values": [32, 64, 128, 256, 512], "ordered": true}}], "values": {"kernelLength": 64, "F1": 16, "D": 8, "F2": 256}}, "metrics": {"metrics": {"loss": {"direction": "min", "observations": [{"value": [0.5394954681396484], "step": 0}]}, "accuracy": {"direction": "max", "observations": [{"value": [0.8587894439697266], "step": 0}]}, "val_loss": {"direction": "min", "observations": [{"value": [0.6003975868225098], "step": 0}]}, "val_accuracy": {"direction": "max", "observations": [{"value": [0.8576949834823608], "step": 0}]}}}, "score": 0.8576949834823608, "best_step": 0, "status": "COMPLETED"}
\ No newline at end of file
{"trial_id": "17799440dd1d55d2aff7690f655a8a34", "hyperparameters": {"space": [{"class_name": "Choice", "config": {"name": "kernelLength", "default": 64, "conditions": [], "values": [64, 125, 250], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F1", "default": 16, "conditions": [], "values": [16, 32, 64], "ordered": true}}, {"class_name": "Choice", "config": {"name": "D", "default": 2, "conditions": [], "values": [2, 4, 8], "ordered": true}}, {"class_name": "Choice", "config": {"name": "F2", "default": 32, "conditions": [], "values": [32, 64, 128, 256, 512], "ordered": true}}], "values": {"kernelLength": 125, "F1": 64, "D": 2, "F2": 64}}, "metrics": {"metrics": {"loss": {"direction": "min", "observations": [{"value": [0.5350640416145325], "step": 0}]}, "accuracy": {"direction": "max", "observations": [{"value": [0.8492304682731628], "step": 0}]}, "val_loss": {"direction": "min", "observations": [{"value": [0.6023575663566589], "step": 0}]}, "val_accuracy": {"direction": "max", "observations": [{"value": [0.8590752482414246], "step": 0}]}}}, "score": 0.8590752482414246, "best_step": 0, "status": "COMPLETED"}
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment