Commit 7b1c42d1 authored by okiss's avatar okiss
Browse files

memory fixed in ensemble

parents c5fa3907 427033a3
......@@ -14,7 +14,7 @@ class Classifier_INCEPTION(ConvNet):
Daniel F. Schmidt, Jonathan Weber, Geoffrey I. Webb, Lhassane Idoumghar, Pierre-Alain Muller, François Petitjean
"""
def __init__(self, input_shape, kernel_size=40, epochs = 1, nb_filters=32, verbose=True, batch_size=64, use_residual=True, depth=10, bottleneck_size=32):
def __init__(self, input_shape, kernel_size=40, epochs = 15, nb_filters=32, verbose=True, batch_size=64, use_residual=True, depth=10, bottleneck_size=32):
self.bottleneck_size = bottleneck_size
super(Classifier_INCEPTION, self).__init__(input_shape, kernel_size=kernel_size, epochs=epochs, nb_filters=nb_filters, verbose=verbose, batch_size=batch_size, use_residual=use_residual, depth=depth)
logging.info('--------------- bottleneck_size : ' + str(self.bottleneck_size))
......
......@@ -42,7 +42,7 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
"""
# Choosing model
config['model'] = 'deepeye'
config['model'] = 'inception'
config['downsampled'] = False
config['split'] = False
config['cluster'] = clustering()
......
This diff is collapsed.
......@@ -35,7 +35,8 @@ def run(trainX, trainY):
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet()
elif config['model'] == 'inception':
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'])
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=16, depth=12, bottleneck_size=16)
elif config['model'] == 'xception' :
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'])
elif config['model'] == 'deepeye-rnn':
......@@ -44,25 +45,25 @@ def run(trainX, trainY):
logging.info('Cannot start the program. Please choose one model in the config.py file')
hist, pred_ensemble = classifier.fit(trainX,trainY)
if i == 0:
pred = pred_ensemble.predhis
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j])+np.array(pred_epoch))
# if i == 0:
# pred = pred_ensemble.predhis
# else:
# for j, pred_epoch in enumerate(pred_ensemble.predhis):
# pred[j] = (np.array(pred[j])+np.array(pred_epoch))
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch/config['ensemble']).tolist()
loss.append(bce(pred_ensemble.targets[j],pred_epoch).numpy())
pred_epoch = np.round(pred_epoch,0)
accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets[j]).reshape(-1)-1)**2))
# for j, pred_epoch in enumerate(pred):
# pred_epoch = (pred_epoch/config['ensemble']).tolist()
# loss.append(bce(pred_ensemble.targets[j],pred_epoch).numpy())
# pred_epoch = np.round(pred_epoch,0)
# accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets[j]).reshape(-1)-1)**2))
if config['ensemble']>1:
config['model']+='_ensemble'
if config['split']:
config['model'] = config['model'] + '_cluster'
# if config['ensemble']>1:
# config['model']+='_ensemble'
# if config['split']:
# config['model'] = config['model'] + '_cluster'
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
# hist.history['val_loss'] = loss
# hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], config['model'], val = True)
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
\ No newline at end of file
......@@ -56,13 +56,12 @@ def build_model(hp):
elif config['model'] == 'xception':
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'],
epochs=15, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3)
epochs=2, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3)
)
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
else:
......@@ -85,4 +84,3 @@ def tune(trainX, trainY):
X_train, X_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2, random_state=42)
tuner.search(X_train, y_train, epochs=15, validation_data=(X_val, y_val), verbose=2)
tuner.results_summary()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment